From: kaf24@scramble.cl.cam.ac.uk Date: Fri, 2 Apr 2004 17:08:47 +0000 (+0000) Subject: bitkeeper revision 1.837 (406d9e1fPZYe6CULYL8E7fCs3l-PlQ) X-Git-Tag: archive/raspbian/4.8.0-1+rpi1~1^2~18284 X-Git-Url: https://dgit.raspbian.org/%22http:/www.example.com/cgi/%22https:/%22bookmarks://%22Dat/%22http:/www.example.com/cgi/%22https:/%22bookmarks:/%22Dat?a=commitdiff_plain;h=360c648fb8178bbe59713e49c9dffa87d2f4005c;p=xen.git bitkeeper revision 1.837 (406d9e1fPZYe6CULYL8E7fCs3l-PlQ) processor.h, schedule.c, memory.c: Cleanups. --- diff --git a/xen/common/memory.c b/xen/common/memory.c index b7896e3cfb..90c96c0ea5 100644 --- a/xen/common/memory.c +++ b/xen/common/memory.c @@ -766,32 +766,32 @@ void free_page_type(struct pfn_info *page, unsigned int type) { case PGT_l1_page_table: free_l1_table(page); - if ( unlikely(current->mm.shadow_mode) && - (get_shadow_status(¤t->mm, - page-frame_table) & PSH_shadowed) ) - { - /* using 'current-mm' is safe because page type changes only - occur within the context of the currently running domain as - pagetable pages can not be shared across domains. The one - exception is when destroying a domain. However, we get away - with this as there's no way the current domain can have this - mfn shadowed, so we won't get here... Phew! */ - - unshadow_table( page-frame_table, type ); - put_shadow_status(¤t->mm); + if ( unlikely(current->mm.shadow_mode) && + (get_shadow_status(¤t->mm, + page-frame_table) & PSH_shadowed) ) + { + /* using 'current-mm' is safe because page type changes only + occur within the context of the currently running domain as + pagetable pages can not be shared across domains. The one + exception is when destroying a domain. However, we get away + with this as there's no way the current domain can have this + mfn shadowed, so we won't get here... Phew! */ + + unshadow_table( page-frame_table, type ); + put_shadow_status(¤t->mm); } - return; + return; case PGT_l2_page_table: free_l2_table(page); - if ( unlikely(current->mm.shadow_mode) && - (get_shadow_status(¤t->mm, - page-frame_table) & PSH_shadowed) ) - { - unshadow_table( page-frame_table, type ); - put_shadow_status(¤t->mm); + if ( unlikely(current->mm.shadow_mode) && + (get_shadow_status(¤t->mm, + page-frame_table) & PSH_shadowed) ) + { + unshadow_table( page-frame_table, type ); + put_shadow_status(¤t->mm); } - return; + return; default: BUG(); @@ -813,7 +813,7 @@ static int do_extended_command(unsigned long ptr, unsigned long val) case MMUEXT_PIN_L2_TABLE: okay = get_page_and_type_from_pagenr( pfn, (cmd == MMUEXT_PIN_L2_TABLE) ? PGT_l2_page_table : - PGT_l1_page_table, + PGT_l1_page_table, CHECK_STRICT); if ( unlikely(!okay) ) { @@ -866,7 +866,7 @@ static int do_extended_command(unsigned long ptr, unsigned long val) shadow_mk_pagetable(¤t->mm); - write_ptbase(¤t->mm); + write_ptbase(¤t->mm); put_page_and_type(&frame_table[old_base_pfn]); } @@ -1005,14 +1005,14 @@ int do_mmu_update(mmu_update_t *ureqs, int count) okay = mod_l1_entry((l1_pgentry_t *)va, mk_l1_pgentry(req.val)); - if ( okay && unlikely(current->mm.shadow_mode) && - (get_shadow_status(¤t->mm, page-frame_table) & - PSH_shadowed) ) - { - shadow_l1_normal_pt_update( req.ptr, req.val, - &prev_spfn, &prev_spl1e ); - put_shadow_status(¤t->mm); - } + if ( okay && unlikely(current->mm.shadow_mode) && + (get_shadow_status(¤t->mm, page-frame_table) & + PSH_shadowed) ) + { + shadow_l1_normal_pt_update( req.ptr, req.val, + &prev_spfn, &prev_spl1e ); + put_shadow_status(¤t->mm); + } put_page_type(page); } @@ -1024,13 +1024,13 @@ int do_mmu_update(mmu_update_t *ureqs, int count) mk_l2_pgentry(req.val), pfn); - if ( okay && unlikely(current->mm.shadow_mode) && - (get_shadow_status(¤t->mm, page-frame_table) & - PSH_shadowed) ) - { - shadow_l2_normal_pt_update( req.ptr, req.val ); - put_shadow_status(¤t->mm); - } + if ( okay && unlikely(current->mm.shadow_mode) && + (get_shadow_status(¤t->mm, page-frame_table) & + PSH_shadowed) ) + { + shadow_l2_normal_pt_update( req.ptr, req.val ); + put_shadow_status(¤t->mm); + } put_page_type(page); } @@ -1041,8 +1041,6 @@ int do_mmu_update(mmu_update_t *ureqs, int count) *(unsigned long *)va = req.val; okay = 1; put_page_type(page); - - // at present, we don't shadowing such pages } break; } @@ -1127,8 +1125,10 @@ int do_update_va_mapping(unsigned long page_nr, if ( unlikely(page_nr >= (HYPERVISOR_VIRT_START >> PAGE_SHIFT)) ) return -EINVAL; - // XXX when we make this support 4MB pages we should also - // deal with the case of updating L2s + /* + * XXX When we make this support 4MB superpages we should also deal with + * the case of updating L2 entries. + */ if ( unlikely(!mod_l1_entry(&linear_pg_table[page_nr], mk_l1_pgentry(val))) ) @@ -1138,21 +1138,21 @@ int do_update_va_mapping(unsigned long page_nr, { unsigned long sval; - l1pte_no_fault( ¤t->mm, &val, &sval ); + l1pte_no_fault( ¤t->mm, &val, &sval ); - if ( unlikely(__put_user( sval, ((unsigned long *) (&shadow_linear_pg_table[page_nr])) ) ) ) - { - // Since L2's are guranteed RW, failure indicates the page - // was not shadowed, so ignore. + if ( unlikely(__put_user(sval, ((unsigned long *)( + &shadow_linear_pg_table[page_nr])))) ) + { + /* + * Since L2's are guranteed RW, failure indicates the page was not + * shadowed, so ignore. + */ perfc_incrc(shadow_update_va_fail); - //MEM_LOG("update_va_map: couldn't write update\n"); - } + } - check_pagetable( p, p->mm.pagetable, "va" ); // debug - + check_pagetable( p, p->mm.pagetable, "va" ); /* debug */ } - deferred_ops = percpu_info[cpu].deferred_ops; percpu_info[cpu].deferred_ops = 0; @@ -1309,7 +1309,7 @@ void audit_all_pages(u_char key, void *dev_id, struct pt_regs *regs) /* check ref count for leaf pages */ if ( ((frame_table[i].type_and_flags & PGT_type_mask) == - PGT_writeable_page) ) + PGT_writeable_page) ) { ref_count = 0; diff --git a/xen/common/schedule.c b/xen/common/schedule.c index 6692bacaa3..c830c648e3 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -175,7 +175,7 @@ void init_idle_task(void) struct task_struct *p = current; if ( SCHED_OP(alloc_task, p) < 0) - panic("Failed to allocate scheduler private data for idle task"); + panic("Failed to allocate scheduler private data for idle task"); SCHED_OP(add_task, p); spin_lock_irqsave(&schedule_lock[p->processor], flags); @@ -283,13 +283,11 @@ long do_sched_op(unsigned long op) } -/* sched_pause_sync - synchronously pause a domain's execution - -XXXX This is horibly broken -- here just as a place holder at present, - do not use. - -*/ - +/* + * sched_pause_sync - synchronously pause a domain's execution + * XXXX This is horibly broken -- here just as a place holder at present, + * do not use. + */ void sched_pause_sync(struct task_struct *p) { unsigned long flags; @@ -297,22 +295,21 @@ void sched_pause_sync(struct task_struct *p) spin_lock_irqsave(&schedule_lock[cpu], flags); + /* If not the current task, we can remove it from scheduling now. */ if ( schedule_data[cpu].curr != p ) - /* if not the current task, we can remove it from scheduling now */ SCHED_OP(pause, p); p->state = TASK_PAUSED; spin_unlock_irqrestore(&schedule_lock[cpu], flags); - /* spin until domain is descheduled by its local scheduler */ + /* Spin until domain is descheduled by its local scheduler. */ while ( schedule_data[cpu].curr == p ) { - send_hyp_event(p, _HYP_EVENT_NEED_RESCHED ); - do_yield(); + send_hyp_event(p, _HYP_EVENT_NEED_RESCHED ); + do_yield(); } - - + /* The domain will not be scheduled again until we do a wake_up(). */ } diff --git a/xen/include/asm-i386/processor.h b/xen/include/asm-i386/processor.h index 57990d560e..0cb108c3ce 100644 --- a/xen/include/asm-i386/processor.h +++ b/xen/include/asm-i386/processor.h @@ -425,9 +425,9 @@ struct mm_struct { spinlock_t shadow_lock; struct shadow_status *shadow_ht; struct shadow_status *shadow_ht_free; - struct shadow_status *shadow_ht_extras; // extra allocation units + struct shadow_status *shadow_ht_extras; /* extra allocation units */ unsigned int *shadow_dirty_bitmap; - unsigned int shadow_dirty_bitmap_size; // in pages, bit per page + unsigned int shadow_dirty_bitmap_size; /* in pages, bit per page */ unsigned int shadow_page_count; unsigned int shadow_max_page_count; unsigned int shadow_extras_count; @@ -440,20 +440,12 @@ struct mm_struct { static inline void write_ptbase( struct mm_struct *m ) { -/* printk("write_ptbase mode=%08x pt=%08lx st=%08lx\n", - m->shadow_mode, pagetable_val(m->pagetable), - pagetable_val(m->shadow_table) ); - */ - if( m->shadow_mode ) - { - //check_pagetable( m, m->pagetable, "write_ptbase" ); - write_cr3_counted(pagetable_val(m->shadow_table)); - } + if ( unlikely(m->shadow_mode) ) + write_cr3_counted(pagetable_val(m->shadow_table)); else - write_cr3_counted(pagetable_val(m->pagetable)); + write_cr3_counted(pagetable_val(m->pagetable)); } - #define IDLE0_MM \ { \ perdomain_pt: 0, \